home *** CD-ROM | disk | FTP | other *** search
/ Chip 2007 January, February, March & April / Chip-Cover-CD-2007-02.iso / Pakiet bezpieczenstwa / mini Pentoo LiveCD 2006.1 / mpentoo-2006.1.iso / livecd.squashfs / usr / include / linux / spinlock.h < prev    next >
C/C++ Source or Header  |  2005-10-13  |  17KB  |  609 lines

  1. #ifndef __LINUX_SPINLOCK_H
  2. #define __LINUX_SPINLOCK_H
  3.  
  4. /*
  5.  * include/linux/spinlock.h - generic locking declarations
  6.  */
  7.  
  8. #include <linux/config.h>
  9. #include <linux/preempt.h>
  10. #include <linux/linkage.h>
  11. #include <linux/compiler.h>
  12. #include <linux/thread_info.h>
  13. #include <linux/kernel.h>
  14. #include <linux/stringify.h>
  15.  
  16. #include <asm/processor.h>    /* for cpu relax */
  17. #include <asm/system.h>
  18.  
  19. /*
  20.  * Must define these before including other files, inline functions need them
  21.  */
  22. #define LOCK_SECTION_NAME                       \
  23.         ".text.lock." __stringify(KBUILD_BASENAME)
  24.  
  25. #define LOCK_SECTION_START(extra)               \
  26.         ".subsection 1\n\t"                     \
  27.         extra                                   \
  28.         ".ifndef " LOCK_SECTION_NAME "\n\t"     \
  29.         LOCK_SECTION_NAME ":\n\t"               \
  30.         ".endif\n"
  31.  
  32. #define LOCK_SECTION_END                        \
  33.         ".previous\n\t"
  34.  
  35. #define __lockfunc fastcall __attribute__((section(".spinlock.text")))
  36.  
  37. /*
  38.  * If CONFIG_SMP is set, pull in the _raw_* definitions
  39.  */
  40. #ifdef CONFIG_SMP
  41.  
  42. #define assert_spin_locked(x)    BUG_ON(!spin_is_locked(x))
  43. #include <asm/spinlock.h>
  44.  
  45. int __lockfunc _spin_trylock(spinlock_t *lock);
  46. int __lockfunc _read_trylock(rwlock_t *lock);
  47. int __lockfunc _write_trylock(rwlock_t *lock);
  48.  
  49. void __lockfunc _spin_lock(spinlock_t *lock)    __acquires(spinlock_t);
  50. void __lockfunc _read_lock(rwlock_t *lock)    __acquires(rwlock_t);
  51. void __lockfunc _write_lock(rwlock_t *lock)    __acquires(rwlock_t);
  52.  
  53. void __lockfunc _spin_unlock(spinlock_t *lock)    __releases(spinlock_t);
  54. void __lockfunc _read_unlock(rwlock_t *lock)    __releases(rwlock_t);
  55. void __lockfunc _write_unlock(rwlock_t *lock)    __releases(rwlock_t);
  56.  
  57. unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)    __acquires(spinlock_t);
  58. unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)    __acquires(rwlock_t);
  59. unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)    __acquires(rwlock_t);
  60.  
  61. void __lockfunc _spin_lock_irq(spinlock_t *lock)    __acquires(spinlock_t);
  62. void __lockfunc _spin_lock_bh(spinlock_t *lock)        __acquires(spinlock_t);
  63. void __lockfunc _read_lock_irq(rwlock_t *lock)        __acquires(rwlock_t);
  64. void __lockfunc _read_lock_bh(rwlock_t *lock)        __acquires(rwlock_t);
  65. void __lockfunc _write_lock_irq(rwlock_t *lock)        __acquires(rwlock_t);
  66. void __lockfunc _write_lock_bh(rwlock_t *lock)        __acquires(rwlock_t);
  67.  
  68. void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)    __releases(spinlock_t);
  69. void __lockfunc _spin_unlock_irq(spinlock_t *lock)                __releases(spinlock_t);
  70. void __lockfunc _spin_unlock_bh(spinlock_t *lock)                __releases(spinlock_t);
  71. void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)    __releases(rwlock_t);
  72. void __lockfunc _read_unlock_irq(rwlock_t *lock)                __releases(rwlock_t);
  73. void __lockfunc _read_unlock_bh(rwlock_t *lock)                    __releases(rwlock_t);
  74. void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)    __releases(rwlock_t);
  75. void __lockfunc _write_unlock_irq(rwlock_t *lock)                __releases(rwlock_t);
  76. void __lockfunc _write_unlock_bh(rwlock_t *lock)                __releases(rwlock_t);
  77.  
  78. int __lockfunc _spin_trylock_bh(spinlock_t *lock);
  79. int __lockfunc generic_raw_read_trylock(rwlock_t *lock);
  80. int in_lock_functions(unsigned long addr);
  81.  
  82. #else
  83.  
  84. #define in_lock_functions(ADDR) 0
  85.  
  86. #if !defined(CONFIG_PREEMPT) && !defined(CONFIG_DEBUG_SPINLOCK)
  87. # define _atomic_dec_and_lock(atomic,lock) atomic_dec_and_test(atomic)
  88. # define ATOMIC_DEC_AND_LOCK
  89. #endif
  90.  
  91. #ifdef CONFIG_DEBUG_SPINLOCK
  92.  
  93. #define SPINLOCK_MAGIC    0x1D244B3C
  94. typedef struct {
  95.     unsigned long magic;
  96.     volatile unsigned long lock;
  97.     volatile unsigned int babble;
  98.     const char *module;
  99.     char *owner;
  100.     int oline;
  101. } spinlock_t;
  102. #define SPIN_LOCK_UNLOCKED (spinlock_t) { SPINLOCK_MAGIC, 0, 10, __FILE__ , NULL, 0}
  103.  
  104. #define spin_lock_init(x) \
  105.     do { \
  106.         (x)->magic = SPINLOCK_MAGIC; \
  107.         (x)->lock = 0; \
  108.         (x)->babble = 5; \
  109.         (x)->module = __FILE__; \
  110.         (x)->owner = NULL; \
  111.         (x)->oline = 0; \
  112.     } while (0)
  113.  
  114. #define CHECK_LOCK(x) \
  115.     do { \
  116.          if ((x)->magic != SPINLOCK_MAGIC) { \
  117.             printk(KERN_ERR "%s:%d: spin_is_locked on uninitialized spinlock %p.\n", \
  118.                     __FILE__, __LINE__, (x)); \
  119.         } \
  120.     } while(0)
  121.  
  122. #define _raw_spin_lock(x)        \
  123.     do { \
  124.          CHECK_LOCK(x); \
  125.         if ((x)->lock&&(x)->babble) { \
  126.             (x)->babble--; \
  127.             printk("%s:%d: spin_lock(%s:%p) already locked by %s/%d\n", \
  128.                     __FILE__,__LINE__, (x)->module, \
  129.                     (x), (x)->owner, (x)->oline); \
  130.         } \
  131.         (x)->lock = 1; \
  132.         (x)->owner = __FILE__; \
  133.         (x)->oline = __LINE__; \
  134.     } while (0)
  135.  
  136. /* without debugging, spin_is_locked on UP always says
  137.  * FALSE. --> printk if already locked. */
  138. #define spin_is_locked(x) \
  139.     ({ \
  140.          CHECK_LOCK(x); \
  141.         if ((x)->lock&&(x)->babble) { \
  142.             (x)->babble--; \
  143.             printk("%s:%d: spin_is_locked(%s:%p) already locked by %s/%d\n", \
  144.                     __FILE__,__LINE__, (x)->module, \
  145.                     (x), (x)->owner, (x)->oline); \
  146.         } \
  147.         0; \
  148.     })
  149.  
  150. /* with debugging, assert_spin_locked() on UP does check
  151.  * the lock value properly */
  152. #define assert_spin_locked(x) \
  153.     ({ \
  154.         CHECK_LOCK(x); \
  155.         BUG_ON(!(x)->lock); \
  156.     })
  157.  
  158. /* without debugging, spin_trylock on UP always says
  159.  * TRUE. --> printk if already locked. */
  160. #define _raw_spin_trylock(x) \
  161.     ({ \
  162.          CHECK_LOCK(x); \
  163.         if ((x)->lock&&(x)->babble) { \
  164.             (x)->babble--; \
  165.             printk("%s:%d: spin_trylock(%s:%p) already locked by %s/%d\n", \
  166.                     __FILE__,__LINE__, (x)->module, \
  167.                     (x), (x)->owner, (x)->oline); \
  168.         } \
  169.         (x)->lock = 1; \
  170.         (x)->owner = __FILE__; \
  171.         (x)->oline = __LINE__; \
  172.         1; \
  173.     })
  174.  
  175. #define spin_unlock_wait(x)    \
  176.     do { \
  177.          CHECK_LOCK(x); \
  178.         if ((x)->lock&&(x)->babble) { \
  179.             (x)->babble--; \
  180.             printk("%s:%d: spin_unlock_wait(%s:%p) owned by %s/%d\n", \
  181.                     __FILE__,__LINE__, (x)->module, (x), \
  182.                     (x)->owner, (x)->oline); \
  183.         }\
  184.     } while (0)
  185.  
  186. #define _raw_spin_unlock(x) \
  187.     do { \
  188.          CHECK_LOCK(x); \
  189.         if (!(x)->lock&&(x)->babble) { \
  190.             (x)->babble--; \
  191.             printk("%s:%d: spin_unlock(%s:%p) not locked\n", \
  192.                     __FILE__,__LINE__, (x)->module, (x));\
  193.         } \
  194.         (x)->lock = 0; \
  195.     } while (0)
  196. #else
  197. /*
  198.  * gcc versions before ~2.95 have a nasty bug with empty initializers.
  199.  */
  200. #if (__GNUC__ > 2)
  201.   typedef struct { } spinlock_t;
  202.   #define SPIN_LOCK_UNLOCKED (spinlock_t) { }
  203. #else
  204.   typedef struct { int gcc_is_buggy; } spinlock_t;
  205.   #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
  206. #endif
  207.  
  208. /*
  209.  * If CONFIG_SMP is unset, declare the _raw_* definitions as nops
  210.  */
  211. #define spin_lock_init(lock)    do { (void)(lock); } while(0)
  212. #define _raw_spin_lock(lock)    do { (void)(lock); } while(0)
  213. #define spin_is_locked(lock)    ((void)(lock), 0)
  214. #define assert_spin_locked(lock)    do { (void)(lock); } while(0)
  215. #define _raw_spin_trylock(lock)    (((void)(lock), 1))
  216. #define spin_unlock_wait(lock)    (void)(lock)
  217. #define _raw_spin_unlock(lock) do { (void)(lock); } while(0)
  218. #endif /* CONFIG_DEBUG_SPINLOCK */
  219.  
  220. /* RW spinlocks: No debug version */
  221.  
  222. #if (__GNUC__ > 2)
  223.   typedef struct { } rwlock_t;
  224.   #define RW_LOCK_UNLOCKED (rwlock_t) { }
  225. #else
  226.   typedef struct { int gcc_is_buggy; } rwlock_t;
  227.   #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
  228. #endif
  229.  
  230. #define rwlock_init(lock)    do { (void)(lock); } while(0)
  231. #define _raw_read_lock(lock)    do { (void)(lock); } while(0)
  232. #define _raw_read_unlock(lock)    do { (void)(lock); } while(0)
  233. #define _raw_write_lock(lock)    do { (void)(lock); } while(0)
  234. #define _raw_write_unlock(lock)    do { (void)(lock); } while(0)
  235. #define read_can_lock(lock)    (((void)(lock), 1))
  236. #define write_can_lock(lock)    (((void)(lock), 1))
  237. #define _raw_read_trylock(lock) ({ (void)(lock); (1); })
  238. #define _raw_write_trylock(lock) ({ (void)(lock); (1); })
  239.  
  240. #define _spin_trylock(lock)    ({preempt_disable(); _raw_spin_trylock(lock) ? \
  241.                 1 : ({preempt_enable(); 0;});})
  242.  
  243. #define _read_trylock(lock)    ({preempt_disable();_raw_read_trylock(lock) ? \
  244.                 1 : ({preempt_enable(); 0;});})
  245.  
  246. #define _write_trylock(lock)    ({preempt_disable(); _raw_write_trylock(lock) ? \
  247.                 1 : ({preempt_enable(); 0;});})
  248.  
  249. #define _spin_trylock_bh(lock)    ({preempt_disable(); local_bh_disable(); \
  250.                 _raw_spin_trylock(lock) ? \
  251.                 1 : ({preempt_enable(); local_bh_enable(); 0;});})
  252.  
  253. #define _spin_lock(lock)    \
  254. do { \
  255.     preempt_disable(); \
  256.     _raw_spin_lock(lock); \
  257.     __acquire(lock); \
  258. } while(0)
  259.  
  260. #define _write_lock(lock) \
  261. do { \
  262.     preempt_disable(); \
  263.     _raw_write_lock(lock); \
  264.     __acquire(lock); \
  265. } while(0)
  266.  
  267. #define _read_lock(lock)    \
  268. do { \
  269.     preempt_disable(); \
  270.     _raw_read_lock(lock); \
  271.     __acquire(lock); \
  272. } while(0)
  273.  
  274. #define _spin_unlock(lock) \
  275. do { \
  276.     _raw_spin_unlock(lock); \
  277.     preempt_enable(); \
  278.     __release(lock); \
  279. } while (0)
  280.  
  281. #define _write_unlock(lock) \
  282. do { \
  283.     _raw_write_unlock(lock); \
  284.     preempt_enable(); \
  285.     __release(lock); \
  286. } while(0)
  287.  
  288. #define _read_unlock(lock) \
  289. do { \
  290.     _raw_read_unlock(lock); \
  291.     preempt_enable(); \
  292.     __release(lock); \
  293. } while(0)
  294.  
  295. #define _spin_lock_irqsave(lock, flags) \
  296. do {    \
  297.     local_irq_save(flags); \
  298.     preempt_disable(); \
  299.     _raw_spin_lock(lock); \
  300.     __acquire(lock); \
  301. } while (0)
  302.  
  303. #define _spin_lock_irq(lock) \
  304. do { \
  305.     local_irq_disable(); \
  306.     preempt_disable(); \
  307.     _raw_spin_lock(lock); \
  308.     __acquire(lock); \
  309. } while (0)
  310.  
  311. #define _spin_lock_bh(lock) \
  312. do { \
  313.     local_bh_disable(); \
  314.     preempt_disable(); \
  315.     _raw_spin_lock(lock); \
  316.     __acquire(lock); \
  317. } while (0)
  318.  
  319. #define _read_lock_irqsave(lock, flags) \
  320. do {    \
  321.     local_irq_save(flags); \
  322.     preempt_disable(); \
  323.     _raw_read_lock(lock); \
  324.     __acquire(lock); \
  325. } while (0)
  326.  
  327. #define _read_lock_irq(lock) \
  328. do { \
  329.     local_irq_disable(); \
  330.     preempt_disable(); \
  331.     _raw_read_lock(lock); \
  332.     __acquire(lock); \
  333. } while (0)
  334.  
  335. #define _read_lock_bh(lock) \
  336. do { \
  337.     local_bh_disable(); \
  338.     preempt_disable(); \
  339.     _raw_read_lock(lock); \
  340.     __acquire(lock); \
  341. } while (0)
  342.  
  343. #define _write_lock_irqsave(lock, flags) \
  344. do {    \
  345.     local_irq_save(flags); \
  346.     preempt_disable(); \
  347.     _raw_write_lock(lock); \
  348.     __acquire(lock); \
  349. } while (0)
  350.  
  351. #define _write_lock_irq(lock) \
  352. do { \
  353.     local_irq_disable(); \
  354.     preempt_disable(); \
  355.     _raw_write_lock(lock); \
  356.     __acquire(lock); \
  357. } while (0)
  358.  
  359. #define _write_lock_bh(lock) \
  360. do { \
  361.     local_bh_disable(); \
  362.     preempt_disable(); \
  363.     _raw_write_lock(lock); \
  364.     __acquire(lock); \
  365. } while (0)
  366.  
  367. #define _spin_unlock_irqrestore(lock, flags) \
  368. do { \
  369.     _raw_spin_unlock(lock); \
  370.     local_irq_restore(flags); \
  371.     preempt_enable(); \
  372.     __release(lock); \
  373. } while (0)
  374.  
  375. #define _spin_unlock_irq(lock) \
  376. do { \
  377.     _raw_spin_unlock(lock); \
  378.     local_irq_enable(); \
  379.     preempt_enable(); \
  380.     __release(lock); \
  381. } while (0)
  382.  
  383. #define _spin_unlock_bh(lock) \
  384. do { \
  385.     _raw_spin_unlock(lock); \
  386.     preempt_enable(); \
  387.     local_bh_enable(); \
  388.     __release(lock); \
  389. } while (0)
  390.  
  391. #define _write_unlock_bh(lock) \
  392. do { \
  393.     _raw_write_unlock(lock); \
  394.     preempt_enable(); \
  395.     local_bh_enable(); \
  396.     __release(lock); \
  397. } while (0)
  398.  
  399. #define _read_unlock_irqrestore(lock, flags) \
  400. do { \
  401.     _raw_read_unlock(lock); \
  402.     local_irq_restore(flags); \
  403.     preempt_enable(); \
  404.     __release(lock); \
  405. } while (0)
  406.  
  407. #define _write_unlock_irqrestore(lock, flags) \
  408. do { \
  409.     _raw_write_unlock(lock); \
  410.     local_irq_restore(flags); \
  411.     preempt_enable(); \
  412.     __release(lock); \
  413. } while (0)
  414.  
  415. #define _read_unlock_irq(lock)    \
  416. do { \
  417.     _raw_read_unlock(lock);    \
  418.     local_irq_enable();    \
  419.     preempt_enable();    \
  420.     __release(lock); \
  421. } while (0)
  422.  
  423. #define _read_unlock_bh(lock)    \
  424. do { \
  425.     _raw_read_unlock(lock);    \
  426.     local_bh_enable();    \
  427.     preempt_enable();    \
  428.     __release(lock); \
  429. } while (0)
  430.  
  431. #define _write_unlock_irq(lock)    \
  432. do { \
  433.     _raw_write_unlock(lock);    \
  434.     local_irq_enable();    \
  435.     preempt_enable();    \
  436.     __release(lock); \
  437. } while (0)
  438.  
  439. #endif /* !SMP */
  440. #ifdef __KERNEL__
  441.  
  442. /*
  443.  * Define the various spin_lock and rw_lock methods.  Note we define these
  444.  * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
  445.  * methods are defined as nops in the case they are not required.
  446.  */
  447. #define spin_trylock(lock)    __cond_lock(_spin_trylock(lock))
  448. #define read_trylock(lock)    __cond_lock(_read_trylock(lock))
  449. #define write_trylock(lock)    __cond_lock(_write_trylock(lock))
  450.  
  451. #define spin_lock(lock)        _spin_lock(lock)
  452. #define write_lock(lock)    _write_lock(lock)
  453. #define read_lock(lock)        _read_lock(lock)
  454.  
  455. #ifdef CONFIG_SMP
  456. #define spin_lock_irqsave(lock, flags)    flags = _spin_lock_irqsave(lock)
  457. #define read_lock_irqsave(lock, flags)    flags = _read_lock_irqsave(lock)
  458. #define write_lock_irqsave(lock, flags)    flags = _write_lock_irqsave(lock)
  459. #else
  460. #define spin_lock_irqsave(lock, flags)    _spin_lock_irqsave(lock, flags)
  461. #define read_lock_irqsave(lock, flags)    _read_lock_irqsave(lock, flags)
  462. #define write_lock_irqsave(lock, flags)    _write_lock_irqsave(lock, flags)
  463. #endif
  464.  
  465. #define spin_lock_irq(lock)        _spin_lock_irq(lock)
  466. #define spin_lock_bh(lock)        _spin_lock_bh(lock)
  467.  
  468. #define read_lock_irq(lock)        _read_lock_irq(lock)
  469. #define read_lock_bh(lock)        _read_lock_bh(lock)
  470.  
  471. #define write_lock_irq(lock)        _write_lock_irq(lock)
  472. #define write_lock_bh(lock)        _write_lock_bh(lock)
  473.  
  474. #define spin_unlock(lock)    _spin_unlock(lock)
  475. #define write_unlock(lock)    _write_unlock(lock)
  476. #define read_unlock(lock)    _read_unlock(lock)
  477.  
  478. #define spin_unlock_irqrestore(lock, flags)    _spin_unlock_irqrestore(lock, flags)
  479. #define spin_unlock_irq(lock)        _spin_unlock_irq(lock)
  480. #define spin_unlock_bh(lock)        _spin_unlock_bh(lock)
  481.  
  482. #define read_unlock_irqrestore(lock, flags)    _read_unlock_irqrestore(lock, flags)
  483. #define read_unlock_irq(lock)            _read_unlock_irq(lock)
  484. #define read_unlock_bh(lock)            _read_unlock_bh(lock)
  485.  
  486. #define write_unlock_irqrestore(lock, flags)    _write_unlock_irqrestore(lock, flags)
  487. #define write_unlock_irq(lock)            _write_unlock_irq(lock)
  488. #define write_unlock_bh(lock)            _write_unlock_bh(lock)
  489.  
  490. #define spin_trylock_bh(lock)            __cond_lock(_spin_trylock_bh(lock))
  491.  
  492. #define spin_trylock_irq(lock) \
  493. ({ \
  494.     local_irq_disable(); \
  495.     _spin_trylock(lock) ? \
  496.     1 : ({local_irq_enable(); 0; }); \
  497. })
  498.  
  499. #define spin_trylock_irqsave(lock, flags) \
  500. ({ \
  501.     local_irq_save(flags); \
  502.     _spin_trylock(lock) ? \
  503.     1 : ({local_irq_restore(flags); 0;}); \
  504. })
  505.  
  506. #ifdef CONFIG_LOCKMETER
  507. extern void _metered_spin_lock   (spinlock_t *lock);
  508. extern void _metered_spin_unlock (spinlock_t *lock);
  509. extern int  _metered_spin_trylock(spinlock_t *lock);
  510. extern void _metered_read_lock    (rwlock_t *lock);
  511. extern void _metered_read_unlock  (rwlock_t *lock);
  512. extern void _metered_write_lock   (rwlock_t *lock);
  513. extern void _metered_write_unlock (rwlock_t *lock);
  514. extern int  _metered_read_trylock (rwlock_t *lock);
  515. extern int  _metered_write_trylock(rwlock_t *lock);
  516. #endif
  517.  
  518. /* "lock on reference count zero" */
  519. #ifndef ATOMIC_DEC_AND_LOCK
  520. #include <asm/atomic.h>
  521. extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
  522. #endif
  523.  
  524. #define atomic_dec_and_lock(atomic,lock) __cond_lock(_atomic_dec_and_lock(atomic,lock))
  525.  
  526. /*
  527.  *  bit-based spin_lock()
  528.  *
  529.  * Don't use this unless you really need to: spin_lock() and spin_unlock()
  530.  * are significantly faster.
  531.  */
  532. static inline void bit_spin_lock(int bitnum, unsigned long *addr)
  533. {
  534.     /*
  535.      * Assuming the lock is uncontended, this never enters
  536.      * the body of the outer loop. If it is contended, then
  537.      * within the inner loop a non-atomic test is used to
  538.      * busywait with less bus contention for a good time to
  539.      * attempt to acquire the lock bit.
  540.      */
  541.     preempt_disable();
  542. #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  543.     while (test_and_set_bit(bitnum, addr)) {
  544.         while (test_bit(bitnum, addr)) {
  545.             preempt_enable();
  546.             cpu_relax();
  547.             preempt_disable();
  548.         }
  549.     }
  550. #endif
  551.     __acquire(bitlock);
  552. }
  553.  
  554. /*
  555.  * Return true if it was acquired
  556.  */
  557. static inline int bit_spin_trylock(int bitnum, unsigned long *addr)
  558. {
  559.     preempt_disable();    
  560. #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  561.     if (test_and_set_bit(bitnum, addr)) {
  562.         preempt_enable();
  563.         return 0;
  564.     }
  565. #endif
  566.     __acquire(bitlock);
  567.     return 1;
  568. }
  569.  
  570. /*
  571.  *  bit-based spin_unlock()
  572.  */
  573. static inline void bit_spin_unlock(int bitnum, unsigned long *addr)
  574. {
  575. #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  576.     BUG_ON(!test_bit(bitnum, addr));
  577.     smp_mb__before_clear_bit();
  578.     clear_bit(bitnum, addr);
  579. #endif
  580.     preempt_enable();
  581.     __release(bitlock);
  582. }
  583.  
  584. /*
  585.  * Return true if the lock is held.
  586.  */
  587. static inline int bit_spin_is_locked(int bitnum, unsigned long *addr)
  588. {
  589. #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  590.     return test_bit(bitnum, addr);
  591. #elif defined CONFIG_PREEMPT
  592.     return preempt_count();
  593. #else
  594.     return 1;
  595. #endif
  596. }
  597.  
  598. #define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED
  599. #define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED
  600.  
  601. /**
  602.  * spin_can_lock - would spin_trylock() succeed?
  603.  * @lock: the spinlock in question.
  604.  */
  605. #define spin_can_lock(lock)        (!spin_is_locked(lock))
  606.  
  607. #endif /* __KERNEL__ */
  608. #endif /* __LINUX_SPINLOCK_H */
  609.